1 //-------------------------------------------------------------------------------------------------
3 // Copyright (c) Microsoft Corporation. All rights reserved.
5 // Page Heap management. Ruthlessly stolen from the C# team. Please notify [....] and [....]
6 // about any changes to this file. It is likely the change will need to be mirrored in the C#
9 //-------------------------------------------------------------------------------------------------
13 /* The biggest trickiness with the page heap is that it is inefficient
14 * to allocate single pages from the operating system - NT will allocate
15 * only on 64K boundaries, so allocating a 4k page is needlessly inefficient.
16 * We use the ability to reserve then commit pages to reserve moderately
17 * large chunks of memory (a PageArena), then commit pages in the arena.
18 * This also allows us to track pages allocated and detect leaks.
24 size_t PageHeap::pageSize
; // The system page size.
25 int PageHeap::pageShift
; // log2 of the page size
26 bool PageHeap::reliableCommit
; // MEM_COMMIT reliable?
28 size_t GetSystemPageSize()
30 static size_t g_pageSize
;
34 // Get the system page size.
36 GetSystemInfo(&sysinfo
);
37 g_pageSize
= sysinfo
.dwPageSize
;
43 void PageHeap::StaticInit()
45 // First time through -- get system page size.
46 if (!PageHeap::pageSize
)
48 // Get the system page size.
49 PageHeap::pageSize
= GetSystemPageSize();
51 // Determine the page shift.
53 size_t size
= PageHeap::pageSize
;
59 PageHeap::pageShift
= shift
;
61 VSASSERT((size_t)(1 << PageHeap::pageShift
) == PageHeap::pageSize
, "Invalid");
64 osvi
.dwOSVersionInfoSize
= sizeof(OSVERSIONINFO
);
65 BOOL ok
= GetVersionEx(&osvi
);
66 VSASSERT(ok
, "Invalid");
67 reliableCommit
= ok
&& osvi
.dwMajorVersion
>= 5;
71 void PageHeap::PageArena::FreeAddressSpace()
73 VirtualFree(pages
, 0, MEM_RELEASE
);
78 bool PageHeap::PageArena::HasUsedPages() const
80 for (unsigned int i
= 0; i
< PAGES_PER_ARENA
/ BITS_DWORD
; i
++)
89 PageHeap::PageHeap() :
96 singlePageArenaList(NULL
),
97 singlePageArenaLast(NULL
),
98 whatIsProtected(ProtectedEntityFlags::Nothing
)
100 CTinyGate
gate (&lock
); // Acquire the lock
105 * Destructor. Free everything.
107 PageHeap::~PageHeap()
109 CTinyGate
gate (&lock
); // Acquire the lock
113 void PageHeap::ShrinkUnusedResources()
115 //[....] the ordering of these is important. First free all possible
116 //pages. That may result in an unused arena which are harvested by the second call.
117 DecommitUnusedPages();
121 // see where our remaining pages were allocated from
124 long totmem
= 1; // for thousands of allocators, this is very expensive on idle thread. Thus just get totals.
125 str
= g_NrlsAllocTracker
->GetAllocatorStatusReport(&count
, &totmem
);
129 // [....] Search a segment of pages in an arena for cPages of contiguous free pages.
130 int PageHeap::PageArena::LookForPages(unsigned int cPages
, int indexPageBegin
, int indexLastValidPage
)
132 unsigned int cPagesRemaining
= cPages
;
135 // Scan to see if cPages pages starting at indexPageBegin are not in use.
136 if (!(indexPageBegin
& DWORD_BIT_MASK
))
138 // The current page is on a DWORD boundary, use an optimized check.
139 // Search this area for free pages. First, find a dword that isn't all used.
140 // This loop quickly skips (32 at a time) the used pages.
142 for (dwIndexByDword
= indexPageBegin
/ BITS_DWORD
;
143 dwIndexByDword
<= indexLastValidPage
/ BITS_DWORD
;
146 if (used
[dwIndexByDword
] != 0xFFFFFFFF)
148 break; // not all used.
152 //all of these pages are used, reset the counter.
153 cPagesRemaining
= cPages
;
156 indexPageBegin
= dwIndexByDword
* BITS_DWORD
;
159 //Did the loop take us beyond the last valid page, if so
160 //this allocation request can't be fulfilled from this range.
161 if (indexPageBegin
> indexLastValidPage
)
166 if (IsPageUsed(indexPageBegin
))
168 cPagesRemaining
= cPages
;
177 if (!cPagesRemaining
)
179 return indexPageBegin
- cPages
;
187 void * PageHeap::PageArena::AllocPagesHelper(int iPage
, unsigned int cPages
, PageHeap
& parent
)
189 size_t cBytes
= cPages
<< pageShift
;
190 void* p
= (BYTE
*)pages
+ (iPage
<< pageShift
); // Calculate address of allocation.
191 bool allCommitted
= true;
193 for (unsigned i
= 0; i
< cPages
&& allCommitted
; i
++)
195 if (! (IsPageCommitted(iPage
+ i
)))
196 allCommitted
= false;
199 // Commit the pages from the OS if needed.
202 if (VirtualAlloc(p
, cBytes
, MEM_COMMIT
, PAGE_READWRITE
) != p
)
204 // If the system couldn't commit the page then we're in
205 // trouble. Presumably it is out of physical memory. Here we
206 // make a last-ditch effort to continue by freeing up unused
207 // pages in hope that we'll then be able to commit.
208 if (!parent
.DecommitUnusedPages() ||
209 VirtualAlloc(p
, cBytes
, MEM_COMMIT
, PAGE_READWRITE
) != p
)
211 VbThrow(GetLastHResultError());
215 if (!reliableCommit
|| allCommitted
)
217 // On Win9X the above call to VirtualAlloc does not leave the memory writeable
218 PageProtect::AllowWrite(ProtectedEntityFlags::UnusedMemory
, p
, cBytes
);
221 // Mark them as in use and committed.
222 unsigned int c
= cPages
;
225 VSASSERT(! (IsPageUsed(iPage
)), "Invalid");
228 MarkPageCommitted(iPage
);
234 // Make sure they aren't zero filled.
235 memset(p
, 0xCC, cBytes
);
241 void* PageHeap::PageArena::AllocPages(unsigned int cPages
, PageHeap
& parent
)
243 //[....] If SPREAD_ALLOCATIONS defined, minimize the reuse of address space.
244 //Do this so that pointers to address space that has been allocated
245 //and then freed more likely point to either decommitted or protected pages.
246 //Otherwise, if we continue to reuse the same address space, dangling ptrs
247 //will very possibly be pointing to accessible memory allowing read and
248 //write through them to bogus data.
251 unsigned int iWhereToBeginPageSearch
=
252 #ifdef SPREAD_ALLOCATIONS
253 m_iStartNextAlloc
; //begin search at page after last allocation
258 int iPage
= LookForPages((unsigned int)cPages
, iWhereToBeginPageSearch
, PAGES_PER_ARENA
- 1);
260 #ifdef SPREAD_ALLOCATIONS
263 //previous search began at position N at which point cPages - 1 may have
264 //been free, but the next page was used and therefore the search didn't
265 //succeed. Therefore look from beginning to N + cPages - 1 so as not to
266 //leave a potential hole of cPages - 1.
267 iPage
= LookForPages((unsigned int)cPages
, 0,
268 min(m_iStartNextAlloc
+ (unsigned int)cPages
- 1, PAGES_PER_ARENA
- 1));
274 #ifdef SPREAD_ALLOCATIONS
275 // Success, we found contiguous free pages.
276 m_iStartNextAlloc
= iPage
+ cPages
;
279 void* p
= AllocPagesHelper(iPage
, cPages
, parent
);
281 // Return the address of the allocated pages.
292 * Allocate a set of pages from the page heap. Memory is not
296 void * PageHeap::AllocPages(size_t sz
)
298 CTinyGate
gate (&lock
); // Acquire the lock
300 VSASSERT(sz
% pageSize
== 0 && sz
!= 0, "Invalid"); // must be page size multiple.
302 m_pageCurUse
+= sz
/ pageSize
;
303 if (m_pageCurUse
> m_pageMaxUse
)
304 m_pageMaxUse
= m_pageCurUse
;
306 // How many pages are being allocated?
307 size_t cPages
= (sz
>> pageShift
);
309 // Handle Single Page alloc requests in an optimized fashion
310 // This case is taken >99% of the time
313 return SinglePageAlloc();
316 // Handle very large allocations differently.
317 if (sz
> BIGALLOC_SIZE
)
319 return LargeAlloc(sz
);
325 // Check each arena in turn for enough contiguous pages.
326 for (arena
= arenaList
; arena
!= NULL
; arena
= arena
->nextArena
)
328 if (arena
->type
== LargeAllocation
)
329 continue; // Large allocation arenas are not interesting.
331 if (p
= arena
->AllocPages((unsigned int)cPages
, *this))
337 // No arenas have enough free space. Create a new arena and allocate
338 // at the beginning of that arena.
339 arena
= CreateArena(Normal
, PAGES_PER_ARENA
* pageSize
);
342 p
= arena
->AllocPages((unsigned int)cPages
, *this);
347 template <typename T
>
348 void PageHeap::RemoveArena (const T
* goingAway
, T
*& containingArenaList
, T
*& containingArenaListLast
)
350 TemplateUtil::CompileAssertIsChildOf
<PageArena
,T
>();
351 // Remove the arena from the arena list.
352 if (containingArenaList
== goingAway
)
354 containingArenaList
= (T
*) goingAway
->nextArena
;
355 if (containingArenaListLast
== goingAway
)
356 containingArenaListLast
= NULL
;
362 // Find arena just before the one we want to remove
363 for (arenaPrev
= containingArenaList
; arenaPrev
->nextArena
!= goingAway
; arenaPrev
= (T
*) arenaPrev
->nextArena
)
366 VSASSERT(arenaPrev
->nextArena
== goingAway
, "Invalid");
367 arenaPrev
->nextArena
= (T
*) goingAway
->nextArena
;
368 if (containingArenaListLast
== goingAway
)
369 containingArenaListLast
= arenaPrev
;
373 void PageHeap::FreeUnusedArenas()
375 CTinyGate
gate (&lock
);
377 PageArena
* nextArena
= NULL
;
379 for (PageArena
* arena
= arenaList
; arena
!= NULL
; arena
= nextArena
)
381 nextArena
= arena
->nextArena
;
383 if (arena
->type
== LargeAllocation
)
386 if (!arena
->HasUsedPages())
389 RemoveArena(arena
, arenaList
, arenaLast
);
390 size_t addressSpace
= arena
->GetAddressSpaceSize();
391 arena
->FreeAddressSpace();
392 m_pageCurReserve
-= addressSpace
/ pageSize
;
398 // The only arenas that may potentially be freed are in the queue of arenas with free pages.
399 int size
= static_cast<int>(singlePageArenasWithFreePages
.size());
400 for(int i
= 0; i
< size
; i
++)
402 SinglePageArena
* arena
= singlePageArenasWithFreePages
.front();
403 singlePageArenasWithFreePages
.pop();
405 if (arena
->NumberOfFreePagesAvailable() == PAGES_PER_ARENA
) // all the pages are free
407 // Unlink from list and delete the arena
408 addressToSinglePageArenaMap
.erase(arena
->pages
);
409 RemoveArena(arena
, singlePageArenaList
, singlePageArenaLast
);
410 size_t addressSpace
= arena
->GetAddressSpaceSize();
411 arena
->FreeAddressSpace(); // sets arena->pages = NULL
412 m_pageCurReserve
-= addressSpace
/ pageSize
;
418 singlePageArenasWithFreePages
.push(arena
); // add the arena back to the queue if we didn't delete it
424 * Free pages page to the page heap. The size must be the
425 * same as when allocated.
427 void PageHeap::FreePages(ProtectedEntityFlagsEnum entity
, _Post_invalid_
void * p
, size_t sz
)
429 CTinyGate
gate (&lock
); // Acquire the lock
431 VSASSERT(sz
% pageSize
== 0 && sz
!= 0, "Invalid"); // must be page size multiple.
433 m_pageCurUse
-= sz
/ pageSize
;
435 size_t cPages
= (sz
>> pageShift
);
438 SinglePageFree(entity
, p
);
442 // Handle very large allocations differently.
443 if (sz
> BIGALLOC_SIZE
)
449 // Find the arena this page is in.
450 PageArena
* arena
= FindArena(p
);
451 VSASSERT(arena
, "Invalid");
453 FreePagesHelper(entity
, arena
, p
, sz
);
456 void PageHeap::FreePagesHelper(ProtectedEntityFlagsEnum entity
, PageArena
* arena
, void * p
, size_t sz
)
458 size_t cPages
= (sz
>> pageShift
);
460 // Get page index within this arena, and page count.
461 size_t initialPageIndex
= ((BYTE
*)p
- (BYTE
*)arena
->pages
) >> pageShift
;
463 // Pages must be in-use and committed. Set the pages to not-in-use. We could
464 // decommit the pages here, but it is more efficient to keep them around
465 // committed because we'll probably want them again. To actually decommit
466 // them, call PageHeap::DecommitUnusedPages().
467 size_t iPage
= initialPageIndex
;
470 VSASSERT(arena
->used
[iPage
>> DWORD_BIT_SHIFT
] & (1 << (iPage
& DWORD_BIT_MASK
)), "Invalid");
471 VSASSERT(arena
->committed
[iPage
>> DWORD_BIT_SHIFT
] & (1 << (iPage
& DWORD_BIT_MASK
)), "Invalid");
473 arena
->used
[iPage
>> DWORD_BIT_SHIFT
] &= ~(1 << (iPage
& DWORD_BIT_MASK
));
478 #ifdef DECOMMIT_ON_FREE
480 iPage
= initialPageIndex
;
481 BOOL b
= VirtualFree((BYTE
*)arena
->pages
+ (iPage
<< pageShift
), sz
, MEM_DECOMMIT
);
482 ASSERT(b
); //[....] throw VcsException?
483 size_t cPgs
= sz
>> pageShift
;
488 arena
->ClearPageCommitted((unsigned int)iPage
);
496 PageProtect::AllowWrite(entity
, p
, sz
);
498 // Fill pages with junk to indicated unused.
502 if (PageProtect::IsEntityProtected(ProtectedEntityFlags::UnusedMemory
))
504 PageProtect::ForbidAccess(ProtectedEntityFlags::UnusedMemory
, p
, sz
);
508 PageProtect::AllowWrite(entity
, p
, sz
);
513 /////////////////////////////////////////////////////////////////////////////////
514 // Allocate a very large allocation. An entire arena is allocated for the allocation.
516 void* PageHeap::LargeAlloc(size_t sz
)
518 CTinyGate
gate (&lock
); // Acquire the lock
519 // Create an arena for this large allocation.
520 PageArena
* newArena
= CreateArena(LargeAllocation
, sz
);
523 // Make sure they aren't zero filled.
524 memset(newArena
->pages
, 0xCC, sz
);
527 return newArena
->pages
;
531 * Free a large allocation made via LargeAlloc.
533 void PageHeap::LargeFree(void * p
, size_t sz
)
535 // Find the arena corresponding to this large allocation.
536 CTinyGate
gate (&lock
); // Acquire the lock
537 PageArena
* arena
= FindArena(p
);
538 VSASSERT(arena
&& arena
->type
== LargeAllocation
&& arena
->pages
== p
&& arena
->size
== sz
, "Invalid");
540 m_pageCurReserve
-= sz
/ pageSize
;
544 b
= VirtualFree(p
, 0, MEM_RELEASE
);
545 VSASSERT(b
, "Invalid");
547 // Remove the arena from the arena list.
548 if (arenaList
== arena
)
550 arenaList
= arena
->nextArena
;
551 if (arenaLast
== arena
)
556 PageArena
* arenaPrev
;
558 // Find arena just before the one we want to remove
559 for (arenaPrev
= arenaList
; arenaPrev
->nextArena
!= arena
; arenaPrev
= arenaPrev
->nextArena
)
562 VSASSERT(arenaPrev
->nextArena
== arena
, "Invalid");
563 arenaPrev
->nextArena
= arena
->nextArena
;
564 if (arenaLast
== arena
)
565 arenaLast
= arenaPrev
;
568 // Free the arena structure.
572 /////////////////////////////////////////////////////////////////////////////////
573 // Allocate a single page allocation.
575 void* PageHeap::SinglePageAlloc()
578 SinglePageArena
* arena
;
580 if (!singlePageArenasWithFreePages
.empty())
582 // any arena will work here, taking the first one each time is probably best for locality
583 arena
= singlePageArenasWithFreePages
.front();
585 // pop the top free page from our stack
586 int iPage
= arena
->freePageStack
[arena
->topOfFreePageStack
];
587 arena
->topOfFreePageStack
--;
589 // remove the arena from the singlePageArenasWithFreePages set if we just used the last page
590 if (arena
->NumberOfFreePagesAvailable() == 0)
592 singlePageArenasWithFreePages
.pop();
595 p
= arena
->AllocPagesHelper(iPage
, 1, *this);
597 // Return the address of the allocated pages.
601 // No arenas have enough free space. Create a new arena and allocate
602 // at the beginning of that arena.
603 arena
= (SinglePageArena
*) CreateArena(SinglePageAllocation
, PAGES_PER_ARENA
* pageSize
);
605 int iPage
= arena
->freePageStack
[arena
->topOfFreePageStack
];
606 arena
->topOfFreePageStack
--;
608 p
= arena
->AllocPagesHelper(iPage
, 1, *this);
614 * Free a single page allocation made via SinglePageAlloc.
616 void PageHeap::SinglePageFree(ProtectedEntityFlagsEnum entity
, _Post_invalid_
void * p
)
618 CTinyGate
gate (&lock
); // Acquire the lock
619 // Find the arena corresponding to free
620 // Get the first arena whose starting memory address is greater than p, then go back one arena because
621 // p belongs to the closest arena whose first page is <= p, and upper_bound returns the first arena whose
622 // page is strictly greater than p.
623 SinglePageArena
* arena
= (--addressToSinglePageArenaMap
.upper_bound(p
))->second
;
624 VSASSERT(arena
&& arena
->size
== PAGES_PER_ARENA
* pageSize
&& arena
->OwnsPage(p
), "Invalid");
626 // Mark the page as freed
627 FreePagesHelper(entity
, arena
, p
, pageSize
);
629 // push page back on to our free page stack
630 int iPage
= (int) ((BYTE
*)p
- (BYTE
*)arena
->pages
) >> pageShift
;
631 ++arena
->topOfFreePageStack
;
632 VSASSERT(arena
->topOfFreePageStack
< PAGES_PER_ARENA
, "too many pages available");
633 arena
->freePageStack
[arena
->topOfFreePageStack
] = iPage
;
635 // add this arena back to our free list if we were full, but now have a single free page
636 if (arena
->NumberOfFreePagesAvailable() == 1)
638 singlePageArenasWithFreePages
.push(arena
);
642 void FreeArenaList(PageHeap::PageArena
* list
, bool checkLeaks
)
644 PageHeap::PageArena
* arena
, *nextArena
;
646 for (arena
= list
; arena
!= NULL
; arena
= nextArena
)
648 nextArena
= arena
->nextArena
;
650 // Check arena for leaks, if desired.
653 VSASSERT(arena
->type
!= PageHeap::LargeAllocation
, "Invalid"); // Large allocation should have been freed by now.
655 for (int dwIndex
= 0; dwIndex
< PAGES_PER_ARENA
/ BITS_DWORD
; ++dwIndex
)
657 VSASSERT(arena
->used
[dwIndex
] == 0, "Invalid"); // All pages in this arena should be free.
661 // Free the pages in the arena.
663 b
= VirtualFree(arena
->pages
, 0, MEM_RELEASE
);
664 VSASSERT(b
, "Invalid");
666 // Free the arena structure.
672 * Free everything allocated by the page heap; optionally checking for
673 * leak (memory that hasn't been freed via FreePages).
675 void PageHeap::FreeAllPages(bool checkLeaks
)
677 CTinyGate
gate (&lock
); // Acquire the lock
679 FreeArenaList(arenaList
, checkLeaks
);
680 FreeArenaList(singlePageArenaList
, checkLeaks
);
682 m_pageCurUse
= m_pageCurReserve
= 0;
683 arenaList
= arenaLast
= NULL
;
684 singlePageArenaList
= singlePageArenaLast
= NULL
;
686 addressToSinglePageArenaMap
.clear();
687 singlePageArenasWithFreePages
= std::queue
<SinglePageArena
*>();
690 bool PageHeap::DecommitUnusedPagesFromArenaList(PageArena
* list
)
692 bool anyDecommitted
= false;
697 for (arena
= list
; arena
!= NULL
; arena
= arena
->nextArena
)
699 if (arena
->type
== LargeAllocation
)
702 for (int dwIndex
= 0; dwIndex
< PAGES_PER_ARENA
/ BITS_DWORD
; ++dwIndex
)
704 // Can we decommit 32 pages at once with one OS call?
705 if (arena
->used
[dwIndex
] == 0 && arena
->committed
[dwIndex
] != 0)
707 #pragma warning (push)
708 #pragma warning (disable: 6250)
709 b
= VirtualFree((BYTE
*)arena
->pages
+ ((dwIndex
* BITS_DWORD
) << pageShift
),
710 BITS_DWORD
<< pageShift
,
712 #pragma warning (pop)
713 VSASSERT(b
, "Invalid");
716 anyDecommitted
= true;
717 arena
->committed
[dwIndex
] = 0;
720 else if (arena
->used
[dwIndex
] != arena
->committed
[dwIndex
])
722 // Some pages in this group should be decommitted. Check each one individually.
723 for (int iPage
= dwIndex
* BITS_DWORD
; iPage
< (dwIndex
+ 1) * BITS_DWORD
; ++iPage
)
725 if ( ! (arena
->used
[iPage
>> DWORD_BIT_SHIFT
] & (1 << (iPage
& DWORD_BIT_MASK
))) &&
726 (arena
->committed
[iPage
>> DWORD_BIT_SHIFT
] & (1 << (iPage
& DWORD_BIT_MASK
))))
728 #pragma warning (push)
729 #pragma warning (disable: 6250)
730 b
= VirtualFree((BYTE
*)arena
->pages
+ (iPage
<< pageShift
), pageSize
, MEM_DECOMMIT
);
731 #pragma warning (pop)
732 VSASSERT(b
, "Invalid");
735 anyDecommitted
= true;
736 arena
->committed
[iPage
>> DWORD_BIT_SHIFT
] &= ~(1 << (iPage
& DWORD_BIT_MASK
));
744 // At this point, the only committed pages in this arena should be in use.
745 for (int dwIndex
= 0; dwIndex
< PAGES_PER_ARENA
/ BITS_DWORD
; ++dwIndex
)
747 VSASSERT(arena
->used
[dwIndex
] == arena
->committed
[dwIndex
], "Invalid");
751 return anyDecommitted
;
755 * Decommit any pages that aren't in use. Decommits memory that
756 * can be profitably used by other parts of the system.
758 bool PageHeap::DecommitUnusedPages()
760 CTinyGate
gate (&lock
); // Acquire the lock
762 bool anyDecommitted
= DecommitUnusedPagesFromArenaList(arenaList
);
763 anyDecommitted
|= DecommitUnusedPagesFromArenaList(singlePageArenaList
);
765 return anyDecommitted
;
768 PageHeap::SinglePageArena::SinglePageArena()
770 // push the arena's list of pages onto the free page stack
771 topOfFreePageStack
= -1;
772 for(int i
= 0; i
< PAGES_PER_ARENA
; i
++)
774 ++topOfFreePageStack
;
775 freePageStack
[topOfFreePageStack
] = i
;
780 * Create a new memory arena of a size and reserve or commit pages for it.
781 * If type == LargeAllocation, set this as a "large allocation" arena and commit
782 * the memory. If not just reserve the memory.
784 PageHeap::PageArena
* PageHeap::CreateArena(PageArenaType type
, size_t sz
)
786 // Allocate an arena and reserve pages for it.
788 SinglePageArena
* newSinglePageArena
= NULL
;
790 if (type
== SinglePageAllocation
)
792 newSinglePageArena
= new (zeromemory
) SinglePageArena
;
794 if (!singlePageArenaList
)
796 singlePageArenaList
= singlePageArenaLast
= newSinglePageArena
;
800 // Add to front of arena list
801 newSinglePageArena
->nextArena
= singlePageArenaList
;
802 singlePageArenaList
= newSinglePageArena
;
805 newArena
= newSinglePageArena
;
809 newArena
= new (zeromemory
) PageArena
;
811 // Add to arena list. For efficiency, large allocation arenas are placed
812 // at the end, but regular arenas at the beginning. This ensures that
813 // regular allocation are almost always satisfied by the first arena in the list.
816 arenaList
= arenaLast
= newArena
;
818 else if (type
== LargeAllocation
)
820 // Add to end of arena list.
821 arenaLast
->nextArena
= newArena
;
822 arenaLast
= newArena
;
826 // Add to front of arena list
827 newArena
->nextArena
= arenaList
;
828 arenaList
= newArena
;
832 newArena
->pages
= VirtualAlloc(0, sz
, type
== LargeAllocation
? MEM_COMMIT
: MEM_RESERVE
, PAGE_READWRITE
);
833 if (!newArena
->pages
)
835 VbThrow(GetLastHResultError());
838 if (newSinglePageArena
)
840 // also add the new SinglePageArena to our indexing data structures
841 VSASSERT(addressToSinglePageArenaMap
.find(newSinglePageArena
->pages
) == addressToSinglePageArenaMap
.end(),
842 "We shouldn't already have an arena for this address");
843 addressToSinglePageArenaMap
[newSinglePageArena
->pages
] = newSinglePageArena
;
844 singlePageArenasWithFreePages
.push(newSinglePageArena
);
847 m_pageCurReserve
+= sz
/ pageSize
;
848 if (m_pageCurReserve
> m_pageMaxReserve
)
850 m_pageMaxReserve
= m_pageCurReserve
;
854 newArena
->type
= type
;
860 * Find an arena that contains a particular pointer.
862 PageHeap::PageArena
* PageHeap::FindArena(const void * p
)
866 for (arena
= arenaList
; arena
!= NULL
; arena
= arena
->nextArena
)
868 if (arena
->OwnsPage(p
))
872 VSASSERT(0, "Invalid"); // Should find the arena.